Source code for hysop.core.mpi
# Copyright (c) HySoP 2011-2024
#
# This file is part of HySoP software.
# See "https://particle_methods.gricad-pages.univ-grenoble-alpes.fr/hysop-doc/"
# for further info.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Hysop interface to the mpi implementation.
It contains :
* mpi basic variables (main communicator, rank, size ...)
* :class:`hysop.topology.topology.CartesianTopology` : mpi process distribution + local mesh
This package is used to hide the underlying mpi interface
in order to make any change of this interface, if required, easiest.
At this time we use mpi4py : http://mpi4py.scipy.org
"""
# Everything concerning the chosen mpi implementation is hidden from hysop
# main interface.
# Why? --> to avoid that things like mpi4py. ... spread everywhere in the
# soft so to ease a change of this implementation (if needed).
import hashlib
from mpi4py import MPI as MPI
"""MPI interface"""
processor_name = MPI.Get_processor_name()
"""MPI processor name"""
processor_hash = int(hashlib.sha1(processor_name.encode("utf-8")).hexdigest(), 16) % (
1 << 31
)
"""MPI hashed processor name as integer (fits into a 32bit signed integer)"""
main_comm = MPI.COMM_WORLD.Dup()
"""Main communicator"""
main_rank = main_comm.Get_rank()
"""Rank of the current process in main communicator"""
main_size = main_comm.Get_size()
"""Number of mpi process in main communicator"""
shm_comm = main_comm.Split_type(MPI.COMM_TYPE_SHARED, main_rank)
"""Shared memory communicator"""
shm_rank = shm_comm.Get_rank()
"""Shared memory process id in shm_comm (ie. NUMA node id)"""
shm_size = shm_comm.Get_size()
"""Shared memory process count in shm_comm (ie. NUMA nodes count)"""
intershm_comm = main_comm.Split(color=int(shm_rank == 0), key=main_rank)
"""Communicator between shared memory local master ranks"""
if shm_rank != 0:
intershm_comm.Free()
intershm_comm = None
intershm_rank = None
intershm_size = shm_comm.bcast(None, root=0)
is_multishm = False
else:
intershm_rank = intershm_comm.Get_rank()
"""Communicator rank between shm masters"""
intershm_size = shm_comm.bcast(intershm_comm.Get_size(), root=0)
"""Communicator size between shm masters"""
is_multishm = intershm_size > 1
"""True if the programm runs on different shared memory communicators"""
host_comm = main_comm.Split(color=processor_hash, key=main_rank)
"""Intrahost communicator"""
host_rank = host_comm.Get_rank()
"""Intrahost rank"""
host_size = host_comm.Get_size()
"""Intrahost size"""
interhost_comm = main_comm.Split(color=int(host_rank == 0), key=main_rank)
"""Interhost communicator (between each host local master rank)"""
if host_rank != 0:
interhost_comm.Free()
interhost_comm = None
interhost_rank = None
interhost_size = main_comm.bcast(None, root=0)
else:
interhost_rank = interhost_comm.Get_rank()
"""Communicator rank between hosts"""
interhost_size = main_comm.bcast(interhost_comm.Get_size(), root=0)
"""Communicator size between hosts"""
is_multihost = interhost_size > 1
"""True if the programm runs on different hosts"""
Wtime = MPI.Wtime
"""Function to return elapsed time since some time in the past.
Usage:
tref = Wtime()
# proceed with some computations ...
elapsed = Wtime() - tref
# -> elapsed == time for 'some computations' on the current mpi process
"""
if main_rank == 0:
assert shm_rank == 0
assert host_rank == 0
assert interhost_rank == 0
[docs]
def default_mpi_params():
from hysop.tools.parameters import MPIParams
from hysop.constants import HYSOP_DEFAULT_TASK_ID
return MPIParams(comm=main_comm, task_id=HYSOP_DEFAULT_TASK_ID)